Loading libraries:
library(dplyr)
library(ggplot2)
library(gridExtra)
library(grid)
library(plyr)
#install.packages('cowplot')
library(cowplot)
library(tibble)
library(flextable)
#install.packages('nortest')
library(nortest)
#install.packages('PerformanceAnalytics')
library(PerformanceAnalytics)
library(corrplot)
library(RColorBrewer)
Load data:
AGP <- read.csv("~/Desktop/master_project/project_analysis/01_tidy_data/AGP_all.csv", header = TRUE, sep = ",")
all_healthy <- read.csv("~/Desktop/master_project/project_analysis/01_tidy_data/AGP_healthy.csv", header = TRUE, sep = ",")
nrow(AGP)
## [1] 10399
nrow(all_healthy)
## [1] 847
Let’s define vector of names of the alpha diversity metrics that are going to be analysed:
metric <- c("shannon_entropy", "chao1", "menhinick", "margalef", "fisher_alpha", "simpson", "gini_index", "strong", "pielou_evenness", "faith_pd" )
Generate a vector of 10 random colors for histograms:
qual_col_pals <- brewer.pal.info[brewer.pal.info$category == 'qual',]
colors <- unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
histo <- vector('list', length(metric))
for (i in 1:length(metric)){
histo[[i]] <- all_healthy %>% ggplot(aes(x = .data[[metric[i]]])) +
geom_histogram(aes(y=..density..), colour="black", fill="white", bins=30)+
geom_density(alpha=.2, fill=colors[i]) +
xlab(label = metric[i]) +
ylab(label = "density")
}
grid.arrange(histo[[1]], histo[[2]],histo[[3]], histo[[4]],histo[[5]], histo[[6]],histo[[7]], histo[[8]],histo[[9]], histo[[10]], ncol=3, top = textGrob("Distributions of 1) Shannon's index 2) Chao1 3) Menhinick's index \n4) Margalef's index 5) Fisher's index 6) Simpson 7) Gini index 8) Strong's index 9) Pielou's evenness \nand 10) Faith's PD in healthy dataset", gp=gpar(fontsize=10,font=2)))
## Warning: The dot-dot notation (`..density..`) was deprecated in ggplot2 3.4.0.
## ℹ Please use `after_stat(density)` instead.
pval_ks <- list()
stat_ks <- list()
for (i in 1:length(metric)){
pval_ks[i] <- ks.test(all_healthy[[metric[i]]], 'pnorm')[2]
stat_ks[i] <- ks.test(all_healthy[[metric[i]]], 'pnorm')[1]
}
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
## Warning in ks.test(all_healthy[[metric[i]]], "pnorm"): ties should not be
## present for the Kolmogorov-Smirnov test
stat_ks <- lapply(stat_ks, unname) %>% unlist(stat_ks)
pval_ks <- unlist(pval_ks)
data.frame(metric=metric, statistic=stat_ks, p.value=pval_ks) %>% flextable() %>%
add_header_lines(values = "Results of the Kolmogorov-Smirnov Test of distribution normality")
Results of the Kolmogorov-Smirnov Test of distribution normality | ||
metric | statistic | p.value |
shannon_entropy | 0.9255021 | 0 |
chao1 | 1.0000000 | 0 |
menhinick | 0.8476195 | 0 |
margalef | 0.9939637 | 0 |
fisher_alpha | 0.9970833 | 0 |
simpson | 0.6427992 | 0 |
gini_index | 0.8410187 | 0 |
strong | 0.6807663 | 0 |
pielou_evenness | 0.5750921 | 0 |
faith_pd | 0.9999364 | 0 |
Anderson-Darling Test in R (Quick Normality Check)
pval_ad <- list()
stat_ad <- list()
for (i in 1:length(metric)){
pval_ad[i] <- ad.test(all_healthy[[metric[i]]])[2]
stat_ad[i] <- ad.test(all_healthy[[metric[i]]])[1]
}
stat_ad <- lapply(stat_ad, unname) %>% unlist(stat_ad)
pval_ad <- unlist(pval_ad)
data.frame(metric=metric, statistic=stat_ad, p.value=pval_ad) %>% flextable() %>%
add_header_lines(values = "Results of the Anderson-Darling Test of distribution normality")
Results of the Anderson-Darling Test of distribution normality | ||
metric | statistic | p.value |
shannon_entropy | 14.057622 | 0.0000000000000000000000037 |
chao1 | 2.910347 | 0.0000002559166964639393842 |
menhinick | 1.131161 | 0.0058226815171156590111856 |
margalef | 1.131161 | 0.0058226815171156590111856 |
fisher_alpha | 5.549019 | 0.0000000000001098062220114 |
simpson | 72.927846 | 0.0000000000000000000000037 |
gini_index | 6.105243 | 0.0000000000000051613624378 |
strong | 13.867885 | 0.0000000000000000000000037 |
pielou_evenness | 27.148388 | 0.0000000000000000000000037 |
faith_pd | 1.708195 | 0.0002220340277928300174760 |
Closest to normal distribution: margalef -> p-value: 0.0048 menhinick -> p-value: 0.0048 faith_pd -> p-value: 0.00017
Most of the variables are categorical.
First lets check corelations between alpha diversity metrics
library(corrplot)
metrics <- all_healthy[,2:11]
#metrics <- all_healthy[,2:9]
cor_matrix <- cor(metrics)
corrplot(cor_matrix, type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45)
chart.Correlation(all_healthy[, 2:11], histogram=TRUE, pch=19)
#chart.Correlation(all_healthy[, 2:9], histogram=TRUE, pch=19)
For numerical variables we can perform simple Pearson’s correlation coefficient:
# Identify numeric columns
num_cols <- unlist(lapply(all_healthy, is.numeric))
# Subset numeric columns of data
data_num <- all_healthy[ , num_cols]
data_num$birth_year <- NULL
# Save results as data frame
correlation_num <- as.data.frame(cor(data_num[-c(11:14)], data_num[-c(1:10)]))
#correlation_num <- as.data.frame(cor(data_num[-c(9:13)], data_num[-c(1:8)]))
correlation_num %>% tibble::rownames_to_column() %>% flextable()
rowname | host_age | host_body_mass_index | host_height | host_weight |
shannon_entropy | -0.06253494 | -0.07459529 | -0.03116381 | -0.07072838 |
chao1 | 0.07626194 | -0.10281032 | -0.02668104 | -0.08601793 |
menhinick | 0.06829021 | -0.10226411 | -0.01419789 | -0.07668103 |
margalef | 0.06829021 | -0.10226411 | -0.01419789 | -0.07668103 |
fisher_alpha | 0.07493539 | -0.10448474 | -0.01526414 | -0.07869252 |
gini_index | -0.01698390 | 0.10864442 | 0.01470289 | 0.08014057 |
strong | 0.09753254 | 0.06851877 | 0.01875083 | 0.05798898 |
simpson | -0.10344271 | -0.02855986 | -0.05057252 | -0.05508757 |
faith_pd | 0.06594318 | -0.10807043 | -0.02746907 | -0.08974110 |
pielou_evenness | -0.10387956 | -0.05568768 | -0.03512946 | -0.06151643 |
# Plot correlation
cor_matrix <- cor(data_num[-c(1:10)], data_num[-c(11:14)])
#cor_matrix <- cor(data_num[-c(1:8)], data_num[-c(9:13)])
corrplot(cor_matrix, tl.col = "black", tl.srt = 45)
We can use linear regression models. From a simple regression model, we can derive the coefficient of determination.
For a simple case, where continuous dependent variable is Y and there is just one continuous variable X, the numeric value of the r-squared is equal to the square of the correlation of X and Y.
Now if we have one continuous variable and one categorical variable, it is impossible to calculate the correlation between them. However, we can use regression to come up with a numeric value that can be treated similarly as correlation. For that, we need to make a regression model taking the continuous variable as dependent variable and categorical variable as independent variable. The model gives a r-sq value. We need to take the square root of that r-sq value.
source: source link
We can use linear regression which is used when the dependent variable is continuous. The predictors can be anything (nominal or ordinal categorical, or continuous, or a mix).
source: source link
As a result of building a linear model we get multiple R: The multiple correlation coefficient between three or more variables. Multiple R-Squared: This is calculated as (Multiple R)2 and it represents the proportion of the variance in the response variable of a regression model that can be explained by the predictor variables. This value ranges from 0 to 1.
source: source link
More info about the interpretation: source: source link
# Extract categorical variables from all_healthy data frame
cat_cols <- unlist(lapply(all_healthy, is.character))
# Subset numeric columns of data
data_cat <- all_healthy[ , cat_cols]
data_cat$sample_id <- NULL
#head(data_cat)
#ncol(data_cat)
# Calculate regression coefficient for all categorical variables in relation to all alpha diversity metrics
correlation_cat <- data.frame(variable=character(0))
for (i in 1:ncol(data_cat)){
correlation_cat[i,1] <- colnames(data_cat)[i]
}
for (j in 2:11){
#for (j in 2:9){
temp <- data.frame(col = numeric(0))
for (i in 1:ncol(data_cat)){
names(temp)[1] <- colnames(all_healthy)[j]
model <- lm(all_healthy[,j] ~ data_cat[,i])
sumry <- summary(model)
r <- sqrt(sumry$r.squared)
temp[i,1] <- r
}
correlation_cat <- cbind(correlation_cat, temp)
}
modelXY <- lm(all_healthy$shannon_entropy ~ data_cat$age_cat)
## model summary
sumryXY <- summary(modelXY)
sumryXY
##
## Call:
## lm(formula = all_healthy$shannon_entropy ~ data_cat$age_cat)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.0388 -0.7576 0.3022 0.9088 2.3596
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 4.5630 0.1002 45.528 <2e-16 ***
## data_cat$age_cat30s -0.1051 0.1357 -0.774 0.4389
## data_cat$age_cat40s -0.3174 0.1386 -2.290 0.0223 *
## data_cat$age_cat50s -0.1163 0.1346 -0.864 0.3880
## data_cat$age_cat60s -0.2294 0.1460 -1.571 0.1165
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 1.252 on 842 degrees of freedom
## Multiple R-squared: 0.007354, Adjusted R-squared: 0.002638
## F-statistic: 1.56 on 4 and 842 DF, p-value: 0.1831
## r-sq of model
rsqXY <- sumryXY$r.squared
rsqXY
## [1] 0.00735414
#print(rsqXY)
# Extract first 10 variables with the biggest influence on alpha metrics
most_correlated <- data.frame(variable=character(20),regression_coefficient=numeric(20))
for (j in 2:11){
#for (j in 2:9){
temp <- data.frame(variable=character(20), regression_coefficient=numeric(20))
names(temp)[2] <- paste("regression_coefficient", colnames(correlation_cat)[j], sep="_")
temp[,1] <- head(correlation_cat[order(-correlation_cat[,j]),]$variable, 20)
temp[,2] <- head(correlation_cat[order(-correlation_cat[,j]),][,j], 20)
most_correlated <- cbind(most_correlated, temp)
}
most_correlated[,1:2] <- NULL
### Plot "correlations" of categorical variables
# Transform correlation_cat data frame to matrix
cor_matrix_cat <- correlation_cat %>% column_to_rownames(var = "variable")
cor_matrix_cat <- data.matrix(cor_matrix_cat)
# Scale regression coefficients from 0 to 1 and order matrix by the decreasing value of Shannon's entropy
dat <- as.matrix(apply(cor_matrix_cat, MARGIN = 2, FUN = function(X) (X - min(X))/diff(range(X))))
dat <- dat[order(dat[,1], decreasing=TRUE),]
#dat_1 <- dat[,-6]
# Plot the "correlation" matrix
corrplot(dat[1:20,], tl.col = "black", tl.srt = 45)
#corrplot(dat_1[1:20,], tl.col = "black", tl.srt = 45)
# Scale regression coefficients from 0 to 1 and order matrix by the decreasing value of Inverse Simpson's index
#dat_2 <- dat[order(dat[,6], decreasing=TRUE),]
# Plot the "correlation" matrix
#corrplot(dat_2[1:20,], tl.col = "black", tl.srt = 45)
Interesting:
- (+++!) bmi_cat - significant for all of the alpha metrics (Overweight
< Normal)
- (+++!) milk_cheese_frequency - significant for all of the alpha
metrics (rarely < frequently)
- (?)(+++!) specialized_diet_exclude_dairy - significant for all alpha
except Strong (true < false)
- true(41), false(810)
- (?)(+++) plant_protein_frequency - significant for all alpha (Daily
<<)
- does it make sense?
- (++) salted_snacks_frequency - significant for all alpha except for
Simpson and Strong (frequent < rarely)
- (++) fermented_plant_frequency - significant for all alpha except for
chao1 (Daily <<)
- Daily(30), others(812)
- (++) bowel_movement_quality - significant for all alpha except for
Shannon and Strong (but they are on the margin of 0.05 sig lvl)
- diarrhea(73) < normal(711), diarrhea(73) <
constipation(54)
- (?)(++) race - significant for all metrics:
- Asian < Hispanic (all)
- Caucasian < Hispanic (all)
- Asian < Caucasian (all except Shannon, Simpson and Strong)
- (?)(++) non_food_allergies_poison_ivyoak - significant for all alpha
except for Simpson and Strong (true(62) < false(789))
- (?)(++) frozen_dessert_frequency - significant for all metrics except
Chao1 and Simpson (Occasionally < Rarely)
- does it make sense?
- (?)(+) prepared_meals_frequency - significant for Shannon, Simpson,
Gini and Strong (Daily(13) < Never/Rarely)
- (?)(+) seafood_frequency - significant for Chao1, Menhinick, Margalef
and Fisher (Rarely < Occasionally)
- does it make sense?
- (?)(+) non_food_alergies_unspecified - significant for Chao1,
Menhinick, Margalef and Fisher (false < true)
- other_supplement_frequency - significant for Shannon, Simpson and
Strong (Yes < No)
- sex - significant for Shannon, Simpson (m < f) and Gini (f <
m)
- age_cat - significant for Simpson and Strong (20 vs 40)
- others not significant after adjustment
- (?) specialized_diet_exclude_refined_sugars - significant for Shannon
and Simpson (true(54) < false(797))
IMPORTANT: bmi, diary products consumption, race,
bowel_movement_quality
Check if two groups belong to the same distribution -> in an app
library(stringr)
cols <- c("sample_id", "shannon_entropy", "chao1", "menhinick", "margalef", "fisher_alpha", "gini_index", "pielou_evenness" , "simpson", "strong", "faith_pd","country_of_birth", "country_residence" ) ##"inverse_simpson"
healthy_countries <- all_healthy[, names(all_healthy) %in% cols]
europe <- c("Albania", "Austria", "Belarus", "Belgium", "Croatia", "Czech Republic", "Denmark", "France", "Germany", "Greece", "Gibraltar", "Hungary", "Ireland", "Italy", "Moldova, Republic of", "Netherlands", "Norway", "Poland", "Portugal", "Romania", "Russian Federation", "Serbia", "Slovakia", "Spain", "Sweden", "Switzerland", "United Kingdom")
soutrh_america <- c("Argentina", "Chile", "Brazil", "Bolivia", "Colombia", "Peru", "Venezuela")
north_america <- c("Canada", "Mexico", "United States", "United States Minor Outlying Islands", "Jamaica")
asia <- c("Azerbaijan", "China", "Hong Kong", "India", "Iraq", "Japan", "Kazakhstan", "Korea, Republic of", "Lebanon", "Malaysia", "Pakistan", "Philippines", "Singapore", "Thailand", "Turkey", "United Arab Emirates", "Viet Nam", "Bangladesh")
africa <- c("Ethiopia", "Kenya", "Nigeria", "South Africa", "Tanzania, United Republic of", "Zambia")
australia_oceania <- c("Australia", "New Zealand", "Fiji")
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% europe, "Europe"))
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% soutrh_america, "South America"))
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% north_america, "North America"))
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% asia, "Asia"))
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% africa, "Africa"))
healthy_countries$country_of_birth <- sapply(healthy_countries$country_of_birth, function(x) replace(x, x %in% australia_oceania, "Australia and Oceania"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% europe, "Europe"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% soutrh_america, "South America"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% north_america, "North America"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% asia, "Asia"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% africa, "Africa"))
healthy_countries$country_residence <- sapply(healthy_countries$country_residence, function(x) replace(x, x %in% australia_oceania, "Australia and Oceania"))
table(healthy_countries$country_of_birth)
##
## Africa Asia Australia and Oceania
## 9 40 8
## Europe North America South America
## 381 400 9
table(healthy_countries$country_residence)
##
## Africa Asia Australia and Oceania
## 1 6 2
## Europe North America South America
## 386 446 2
## Unspecified
## 4
#delete underrepresented values
healthy_countries_2 <- healthy_countries[!(healthy_countries$country_of_birth=="Africa" | healthy_countries$country_of_birth=="Australia and Oceania" | healthy_countries$country_of_birth=="South America" | healthy_countries$country_residence=="Africa" | healthy_countries$country_residence=="Australia and Oceania" | healthy_countries$country_residence=="Asia" | healthy_countries$country_residence=="South America" | healthy_countries$country_residence=="Unspecified"),]
table(healthy_countries_2$country_of_birth)
##
## Asia Europe North America
## 38 373 398
table(healthy_countries_2$country_residence)
##
## Europe North America
## 372 437
test <- list()
tables <- list()
for (j in 1:length(compare)){
for (i in 1:length(metric)){
test[[i]] <- pairwise.wilcox.test(healthy_countries_2[[metric[i]]], healthy_countries_2[[compare[j]]], p.adjust.method="none") %>%
broom::tidy() %>% add_column(parameter = metric[i], .before='group1')
}
tables[[j]] <- do.call(what = rbind, args = test)
tables[[j]] <- tables[[j]] %>%
add_column(p.adjusted = p.adjust(tables[[j]]$p.value, "fdr"), .after='p.value') %>%
arrange(p.value) %>%
flextable() %>%
bold(~ p.value < 0.05, 4) %>%
bold(~ p.adjusted < 0.05, 5) %>%
add_header_lines(values = "Results of the Wilcox test for distributions of different groups")
}
tables
## [[1]]
## a flextable object.
## col_keys: `parameter`, `group1`, `group2`, `p.value`, `p.adjusted`
## header has 2 row(s)
## body has 30 row(s)
## original dataset sample:
## parameter group1 group2 p.value p.adjusted
## 1 chao1 Europe Asia 0.0004144649 0.003467232
## 2 menhinick North America Europe 0.0005492065 0.003467232
## 3 margalef North America Europe 0.0005492065 0.003467232
## 4 fisher_alpha North America Europe 0.0005492065 0.003467232
## 5 faith_pd North America Europe 0.0005778720 0.003467232
##
## [[2]]
## a flextable object.
## col_keys: `parameter`, `group1`, `group2`, `p.value`, `p.adjusted`
## header has 2 row(s)
## body has 10 row(s)
## original dataset sample:
## parameter group1 group2 p.value p.adjusted
## 1 faith_pd North America Europe 2.441432e-05 0.0001468485
## 2 menhinick North America Europe 5.955079e-05 0.0001468485
## 3 margalef North America Europe 5.955079e-05 0.0001468485
## 4 fisher_alpha North America Europe 5.955079e-05 0.0001468485
## 5 chao1 North America Europe 7.342426e-05 0.0001468485
sessionInfo()
## R version 4.1.2 (2021-11-01)
## Platform: x86_64-pc-linux-gnu (64-bit)
## Running under: Ubuntu 22.04.1 LTS
##
## Matrix products: default
## BLAS: /usr/lib/x86_64-linux-gnu/blas/libblas.so.3.10.0
## LAPACK: /usr/lib/x86_64-linux-gnu/lapack/liblapack.so.3.10.0
##
## locale:
## [1] LC_CTYPE=en_US.UTF-8 LC_NUMERIC=C
## [3] LC_TIME=es_ES.UTF-8 LC_COLLATE=en_US.UTF-8
## [5] LC_MONETARY=es_ES.UTF-8 LC_MESSAGES=en_US.UTF-8
## [7] LC_PAPER=es_ES.UTF-8 LC_NAME=C
## [9] LC_ADDRESS=C LC_TELEPHONE=C
## [11] LC_MEASUREMENT=es_ES.UTF-8 LC_IDENTIFICATION=C
##
## attached base packages:
## [1] grid stats graphics grDevices utils datasets methods
## [8] base
##
## other attached packages:
## [1] stringr_1.4.1 CGPfunctions_0.6.3
## [3] RColorBrewer_1.1-3 corrplot_0.92
## [5] PerformanceAnalytics_2.0.4 xts_0.12.1
## [7] zoo_1.8-9 nortest_1.0-4
## [9] flextable_0.8.2 tibble_3.1.8
## [11] cowplot_1.1.1 plyr_1.8.7
## [13] gridExtra_2.3 ggplot2_3.4.0
## [15] dplyr_1.0.10
##
## loaded via a namespace (and not attached):
## [1] minqa_1.2.4 TH.data_1.1-0 colorspace_2.0-3
## [4] ellipsis_0.3.2 class_7.3-20 sjlabelled_1.2.0
## [7] estimability_1.4.1 base64enc_0.1-3 gld_2.6.6
## [10] rstudioapi_0.14 proxy_0.4-26 farver_2.1.1
## [13] MatrixModels_0.5-0 ggrepel_0.9.1 fansi_1.0.3
## [16] mvtnorm_1.1-3 xml2_1.3.3 codetools_0.2-18
## [19] splines_4.1.2 cachem_1.0.6 rootSolve_1.8.2.3
## [22] libcoin_1.0-9 knitr_1.40 sjmisc_2.8.9
## [25] Formula_1.2-4 jsonlite_1.8.3 nloptr_2.0.0
## [28] broom_1.0.1 compiler_4.1.2 httr_1.4.4
## [31] sjstats_0.18.2 emmeans_1.8.2 backports_1.4.1
## [34] assertthat_0.2.1 Matrix_1.4-0 fastmap_1.1.0
## [37] lazyeval_0.2.2 cli_3.4.1 ggmosaic_0.3.3
## [40] htmltools_0.5.3 tools_4.1.2 partykit_1.2-16
## [43] coda_0.19-4 gtable_0.3.1 glue_1.6.2
## [46] lmom_2.9 Rcpp_1.0.8 cellranger_1.1.0
## [49] jquerylib_0.1.4 vctrs_0.5.0 nlme_3.1-155
## [52] insight_0.18.8 inum_1.0-4 xfun_0.37
## [55] lme4_1.1-28 lifecycle_1.0.3 MASS_7.3-55
## [58] scales_1.2.1 BayesFactor_0.9.12-4.4 parallel_4.1.2
## [61] sandwich_3.0-1 expm_0.999-6 rematch2_2.1.2
## [64] yaml_2.3.6 Exact_3.2 pbapply_1.6-0
## [67] gdtools_0.2.4 sass_0.4.2 rpart_4.1.16
## [70] stringi_1.7.8 highr_0.9 paletteer_1.5.0
## [73] bayestestR_0.13.0 e1071_1.7-9 boot_1.3-28
## [76] zip_2.2.0 rlang_1.0.6 pkgconfig_2.0.3
## [79] systemfonts_1.0.4 evaluate_0.17 lattice_0.20-45
## [82] purrr_0.3.5 labeling_0.4.2 htmlwidgets_1.5.4
## [85] tidyselect_1.2.0 magrittr_2.0.3 R6_2.5.1
## [88] DescTools_0.99.47 generics_0.1.3 multcomp_1.4-18
## [91] DBI_1.1.3 pillar_1.8.1 withr_2.5.0
## [94] survival_3.2-13 datawizard_0.6.4 performance_0.10.1
## [97] modelr_0.1.9 uuid_1.1-0 utf8_1.2.2
## [100] plotly_4.10.1 rmarkdown_2.17 officer_0.4.4
## [103] readxl_1.4.1 data.table_1.14.4 forcats_0.5.2
## [106] digest_0.6.30 xtable_1.8-4 tidyr_1.2.1
## [109] munsell_0.5.0 viridisLite_0.4.1 bslib_0.4.1
## [112] quadprog_1.5-8